Importação das bibliotecas

In [112]:
import pandas as pd
import pandas_profiling
import numpy as np
import seaborn as sns
import sklearn
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import StandardScaler
from sklearn.dummy import DummyClassifier
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from yellowbrick.classifier import ConfusionMatrix
from yellowbrick.classifier import ROCAUC
import sklearn.metrics
from sklearn.metrics import confusion_matrix, roc_auc_score

Importação dos dados do Titanic

In [139]:
df = pd.read_csv(r'C:\Users\gebra\Desktop\DS\Dataset\Titanic\titanic.csv')

Visualização dos dados do Titanic

In [140]:
df.head()
Out[140]:
PassengerId Survived Pclass Name Sex Age SibSp Parch Ticket Fare Cabin Embarked
0 1 0 3 Braund, Mr. Owen Harris male 22.0 1 0 A/5 21171 7.2500 NaN S
1 2 1 1 Cumings, Mrs. John Bradley (Florence Briggs Th... female 38.0 1 0 PC 17599 71.2833 C85 C
2 3 1 3 Heikkinen, Miss. Laina female 26.0 0 0 STON/O2. 3101282 7.9250 NaN S
3 4 1 1 Futrelle, Mrs. Jacques Heath (Lily May Peel) female 35.0 1 0 113803 53.1000 C123 S
4 5 0 3 Allen, Mr. William Henry male 35.0 0 0 373450 8.0500 NaN S
In [141]:
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 12 columns):
 #   Column       Non-Null Count  Dtype  
---  ------       --------------  -----  
 0   PassengerId  891 non-null    int64  
 1   Survived     891 non-null    int64  
 2   Pclass       891 non-null    int64  
 3   Name         891 non-null    object 
 4   Sex          891 non-null    object 
 5   Age          714 non-null    float64
 6   SibSp        891 non-null    int64  
 7   Parch        891 non-null    int64  
 8   Ticket       891 non-null    object 
 9   Fare         891 non-null    float64
 10  Cabin        204 non-null    object 
 11  Embarked     889 non-null    object 
dtypes: float64(2), int64(5), object(5)
memory usage: 83.7+ KB
In [5]:
df.shape
Out[5]:
(891, 12)
In [6]:
pandas_profiling.ProfileReport(df)



Out[6]:

In [7]:
df.describe()
Out[7]:
PassengerId Survived Pclass Age SibSp Parch Fare
count 891.000000 891.000000 891.000000 714.000000 891.000000 891.000000 891.000000
mean 446.000000 0.383838 2.308642 29.699118 0.523008 0.381594 32.204208
std 257.353842 0.486592 0.836071 14.526497 1.102743 0.806057 49.693429
min 1.000000 0.000000 1.000000 0.420000 0.000000 0.000000 0.000000
25% 223.500000 0.000000 2.000000 20.125000 0.000000 0.000000 7.910400
50% 446.000000 0.000000 3.000000 28.000000 0.000000 0.000000 14.454200
75% 668.500000 1.000000 3.000000 38.000000 1.000000 0.000000 31.000000
max 891.000000 1.000000 3.000000 80.000000 8.000000 6.000000 512.329200

Informação sobre dados categoricos

In [142]:
df.Sex.value_counts(dropna=False)
Out[142]:
male      577
female    314
Name: Sex, dtype: int64
In [143]:
df.Ticket.value_counts(dropna=False)
Out[143]:
CA. 2343              7
347082                7
1601                  7
347088                6
CA 2144               6
                     ..
SOTON/O.Q. 3101305    1
2628                  1
PP 4348               1
12460                 1
11752                 1
Name: Ticket, Length: 681, dtype: int64
In [144]:
df.Cabin.value_counts(dropna=False)
Out[144]:
NaN            687
B96 B98          4
C23 C25 C27      4
G6               4
F33              3
              ... 
C111             1
A19              1
C87              1
C148             1
B3               1
Name: Cabin, Length: 148, dtype: int64
In [145]:
df.Embarked.value_counts(dropna=False)
Out[145]:
S      644
C      168
Q       77
NaN      2
Name: Embarked, dtype: int64

Aquisição de titulo do nome dos individuos

In [146]:
nome = df["Name"].str.split(",").str.get(1)
In [147]:
df['Nome'] = nome.str.split(".").str.get(0)
In [148]:
df['Nome'].value_counts()
Out[148]:
 Mr              517
 Miss            182
 Mrs             125
 Master           40
 Dr                7
 Rev               6
 Mlle              2
 Col               2
 Major             2
 Don               1
 Lady              1
 Sir               1
 Capt              1
 the Countess      1
 Ms                1
 Jonkheer          1
 Mme               1
Name: Nome, dtype: int64

Remoção de colunas que não serão utilizadas

In [149]:
 df = df.drop(columns=[
     "PassengerId",
     "Ticket",
     "Name",
     "Cabin"])
In [150]:
df.head()
Out[150]:
Survived Pclass Sex Age SibSp Parch Fare Embarked Nome
0 0 3 male 22.0 1 0 7.2500 S Mr
1 1 1 female 38.0 1 0 71.2833 C Mrs
2 1 3 female 26.0 0 0 7.9250 S Miss
3 1 1 female 35.0 1 0 53.1000 S Mrs
4 0 3 male 35.0 0 0 8.0500 S Mr

Transformação dos dados categoricos

In [151]:
df = pd.get_dummies(df)
In [152]:
df.columns
Out[152]:
Index(['Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Sex_female',
       'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Nome_ Capt',
       'Nome_ Col', 'Nome_ Don', 'Nome_ Dr', 'Nome_ Jonkheer', 'Nome_ Lady',
       'Nome_ Major', 'Nome_ Master', 'Nome_ Miss', 'Nome_ Mlle', 'Nome_ Mme',
       'Nome_ Mr', 'Nome_ Mrs', 'Nome_ Ms', 'Nome_ Rev', 'Nome_ Sir',
       'Nome_ the Countess'],
      dtype='object')
In [153]:
df.head()
Out[153]:
Survived Pclass Age SibSp Parch Fare Sex_female Sex_male Embarked_C Embarked_Q ... Nome_ Master Nome_ Miss Nome_ Mlle Nome_ Mme Nome_ Mr Nome_ Mrs Nome_ Ms Nome_ Rev Nome_ Sir Nome_ the Countess
0 0 3 22.0 1 0 7.2500 0 1 0 0 ... 0 0 0 0 1 0 0 0 0 0
1 1 1 38.0 1 0 71.2833 1 0 1 0 ... 0 0 0 0 0 1 0 0 0 0
2 1 3 26.0 0 0 7.9250 1 0 0 0 ... 0 1 0 0 0 0 0 0 0 0
3 1 1 35.0 1 0 53.1000 1 0 0 0 ... 0 0 0 0 0 1 0 0 0 0
4 0 3 35.0 0 0 8.0500 0 1 0 0 ... 0 0 0 0 1 0 0 0 0 0

5 rows × 28 columns

In [154]:
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 28 columns):
 #   Column              Non-Null Count  Dtype  
---  ------              --------------  -----  
 0   Survived            891 non-null    int64  
 1   Pclass              891 non-null    int64  
 2   Age                 714 non-null    float64
 3   SibSp               891 non-null    int64  
 4   Parch               891 non-null    int64  
 5   Fare                891 non-null    float64
 6   Sex_female          891 non-null    uint8  
 7   Sex_male            891 non-null    uint8  
 8   Embarked_C          891 non-null    uint8  
 9   Embarked_Q          891 non-null    uint8  
 10  Embarked_S          891 non-null    uint8  
 11  Nome_ Capt          891 non-null    uint8  
 12  Nome_ Col           891 non-null    uint8  
 13  Nome_ Don           891 non-null    uint8  
 14  Nome_ Dr            891 non-null    uint8  
 15  Nome_ Jonkheer      891 non-null    uint8  
 16  Nome_ Lady          891 non-null    uint8  
 17  Nome_ Major         891 non-null    uint8  
 18  Nome_ Master        891 non-null    uint8  
 19  Nome_ Miss          891 non-null    uint8  
 20  Nome_ Mlle          891 non-null    uint8  
 21  Nome_ Mme           891 non-null    uint8  
 22  Nome_ Mr            891 non-null    uint8  
 23  Nome_ Mrs           891 non-null    uint8  
 24  Nome_ Ms            891 non-null    uint8  
 25  Nome_ Rev           891 non-null    uint8  
 26  Nome_ Sir           891 non-null    uint8  
 27  Nome_ the Countess  891 non-null    uint8  
dtypes: float64(2), int64(4), uint8(22)
memory usage: 61.0 KB
In [163]:
#A coluna sex_female ja indica se a pessoa é homem ou mulher, pois isso Sex_male sera removida

df = df.drop(columns="Sex_male")
In [164]:
df = pd.get_dummies(df, drop_first=True)
In [165]:
df.columns
Out[165]:
Index(['Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Sex_female',
       'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Nome_ Capt', 'Nome_ Col',
       'Nome_ Don', 'Nome_ Dr', 'Nome_ Jonkheer', 'Nome_ Lady', 'Nome_ Major',
       'Nome_ Master', 'Nome_ Miss', 'Nome_ Mlle', 'Nome_ Mme', 'Nome_ Mr',
       'Nome_ Mrs', 'Nome_ Ms', 'Nome_ Rev', 'Nome_ Sir',
       'Nome_ the Countess'],
      dtype='object')

Preparar e dividir os dados em Treino e Teste

In [166]:
y = df.Survived
In [167]:
X = df.drop(columns="Survived")
In [168]:
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=42)
In [169]:
num_cols = [
... "Pclass",
... "SibSp",
... "Parch",
... "Fare",
... "Age",    
... "Sex_female",
... ]
In [170]:
X_train
Out[170]:
Pclass Age SibSp Parch Fare Sex_female Embarked_C Embarked_Q Embarked_S Nome_ Capt ... Nome_ Master Nome_ Miss Nome_ Mlle Nome_ Mme Nome_ Mr Nome_ Mrs Nome_ Ms Nome_ Rev Nome_ Sir Nome_ the Countess
445 1 4.0 0 2 81.8583 0 0 0 1 0 ... 1 0 0 0 0 0 0 0 0 0
650 3 NaN 0 0 7.8958 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0
172 3 1.0 1 1 11.1333 1 0 0 1 0 ... 0 1 0 0 0 0 0 0 0 0
450 2 36.0 1 2 27.7500 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0
314 2 43.0 1 1 26.2500 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
106 3 21.0 0 0 7.6500 1 0 0 1 0 ... 0 1 0 0 0 0 0 0 0 0
270 1 NaN 0 0 31.0000 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0
860 3 41.0 2 0 14.1083 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0
435 1 14.0 1 2 120.0000 1 0 0 1 0 ... 0 1 0 0 0 0 0 0 0 0
102 1 21.0 0 1 77.2875 0 0 0 1 0 ... 0 0 0 0 1 0 0 0 0 0

623 rows × 26 columns

Tratamento de valores ausentes

In [171]:
X_train.isnull().sum()
Out[171]:
Pclass                  0
Age                   124
SibSp                   0
Parch                   0
Fare                    0
Sex_female              0
Embarked_C              0
Embarked_Q              0
Embarked_S              0
Nome_ Capt              0
Nome_ Col               0
Nome_ Don               0
Nome_ Dr                0
Nome_ Jonkheer          0
Nome_ Lady              0
Nome_ Major             0
Nome_ Master            0
Nome_ Miss              0
Nome_ Mlle              0
Nome_ Mme               0
Nome_ Mr                0
Nome_ Mrs               0
Nome_ Ms                0
Nome_ Rev               0
Nome_ Sir               0
Nome_ the Countess      0
dtype: int64
In [172]:
X_test.isnull().sum()
Out[172]:
Pclass                 0
Age                   53
SibSp                  0
Parch                  0
Fare                   0
Sex_female             0
Embarked_C             0
Embarked_Q             0
Embarked_S             0
Nome_ Capt             0
Nome_ Col              0
Nome_ Don              0
Nome_ Dr               0
Nome_ Jonkheer         0
Nome_ Lady             0
Nome_ Major            0
Nome_ Master           0
Nome_ Miss             0
Nome_ Mlle             0
Nome_ Mme              0
Nome_ Mr               0
Nome_ Mrs              0
Nome_ Ms               0
Nome_ Rev              0
Nome_ Sir              0
Nome_ the Countess     0
dtype: int64
In [173]:
meds = X_train.median()
X_train = X_train.fillna(meds)
X_test = X_test.fillna(meds)
In [174]:
X_train.isnull().sum()
Out[174]:
Pclass                0
Age                   0
SibSp                 0
Parch                 0
Fare                  0
Sex_female            0
Embarked_C            0
Embarked_Q            0
Embarked_S            0
Nome_ Capt            0
Nome_ Col             0
Nome_ Don             0
Nome_ Dr              0
Nome_ Jonkheer        0
Nome_ Lady            0
Nome_ Major           0
Nome_ Master          0
Nome_ Miss            0
Nome_ Mlle            0
Nome_ Mme             0
Nome_ Mr              0
Nome_ Mrs             0
Nome_ Ms              0
Nome_ Rev             0
Nome_ Sir             0
Nome_ the Countess    0
dtype: int64
In [175]:
X_test.isnull().sum()
Out[175]:
Pclass                0
Age                   0
SibSp                 0
Parch                 0
Fare                  0
Sex_female            0
Embarked_C            0
Embarked_Q            0
Embarked_S            0
Nome_ Capt            0
Nome_ Col             0
Nome_ Don             0
Nome_ Dr              0
Nome_ Jonkheer        0
Nome_ Lady            0
Nome_ Major           0
Nome_ Master          0
Nome_ Miss            0
Nome_ Mlle            0
Nome_ Mme             0
Nome_ Mr              0
Nome_ Mrs             0
Nome_ Ms              0
Nome_ Rev             0
Nome_ Sir             0
Nome_ the Countess    0
dtype: int64

Normalização dos dados

In [176]:
sca = preprocessing.StandardScaler()
X_train = sca.fit_transform(X_train)
X_test = sca.transform(X_test)
In [177]:
X_train
Out[177]:
array([[-1.63788124, -1.91971935, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       [ 0.80326712, -0.0772525 , -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       [ 0.80326712, -2.15002771,  0.34868694, ..., -0.08038679,
         0.        , -0.04009635],
       ...,
       [ 0.80326712,  0.92075038,  1.17153529, ..., -0.08038679,
         0.        , -0.04009635],
       [-1.63788124, -1.15202483,  0.34868694, ..., -0.08038679,
         0.        , -0.04009635],
       [-1.63788124, -0.61463866, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635]])
In [178]:
X_test
Out[178]:
array([[ 0.80326712, -0.0772525 ,  0.34868694, ..., -0.08038679,
         0.        , -0.04009635],
       [-0.41730706,  0.15305586, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       [ 0.80326712, -0.69140812, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       ...,
       [-1.63788124,  2.76321724, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       [ 0.80326712, -0.92171647, -0.47416141, ..., -0.08038679,
         0.        , -0.04009635],
       [-1.63788124, -0.0772525 , -0.47416141, ..., -0.08038679,
         0.        , -0.04009635]])

Treinamento e avaliação dos modelos

In [179]:
modelos = [DummyClassifier,
    LogisticRegression,
    DecisionTreeClassifier,
    KNeighborsClassifier,
    GaussianNB,
    RandomForestClassifier,
    SVC,
    ]
In [180]:
modelos2 = ["DummyClassifier",
    "LogisticRegression",
    "DecisionTreeClassifier",
    "KNeighborsClassifier",
    "GaussianNB",
    "RandomForestClassifier",
    "SVC"]
In [181]:
k = 0
for modelo in modelos:
    clf = modelo()
    clf.fit(X_train, y_train)
    resultado = clf.score(X_test, y_test, sample_weight=None)
    print (modelos2[k],":",resultado)
    k += 1
DummyClassifier : 0.5111940298507462
LogisticRegression : 0.8097014925373134
DecisionTreeClassifier : 0.7574626865671642
KNeighborsClassifier : 0.7985074626865671
GaussianNB : 0.5970149253731343
RandomForestClassifier : 0.7910447761194029
SVC : 0.8171641791044776
C:\Users\gebra\Anaconda3\envs\ds\lib\site-packages\sklearn\dummy.py:132: FutureWarning: The default value of strategy will change from stratified to prior in 0.24.
  "stratified to prior in 0.24.", FutureWarning)
In [182]:
k = 0
for modelo in modelos:
    clf = modelo()
    clf.fit(X_train, y_train)
    prev = clf.predict(X_test)
    print (modelos2[k], ":", "\n", confusion_matrix(y_test, prev), "\n")
    k += 1
DummyClassifier : 
 [[87 70]
 [73 38]] 

LogisticRegression : 
 [[133  24]
 [ 27  84]] 

DecisionTreeClassifier : 
 [[129  28]
 [ 32  79]] 

KNeighborsClassifier : 
 [[138  19]
 [ 35  76]] 

GaussianNB : 
 [[157   0]
 [108   3]] 

RandomForestClassifier : 
 [[130  27]
 [ 29  82]] 

SVC : 
 [[144  13]
 [ 36  75]] 

C:\Users\gebra\Anaconda3\envs\ds\lib\site-packages\sklearn\dummy.py:132: FutureWarning: The default value of strategy will change from stratified to prior in 0.24.
  "stratified to prior in 0.24.", FutureWarning)

Curva ROC Graficamente do segundo melhor modelos

Nesse caso foi ultilziado o segundo modelo com a melhor acuracia, pois o SVC deu erro ao treinar com a biblioteca yellowbrick

In [183]:
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split

from yellowbrick.classifier import ROCAUC
from yellowbrick.datasets import load_spam


# Instantiate the visualizer with the classification model
model = LogisticRegression()
visualizer = ROCAUC(model, classes=["Morreu", "Nao Morreu"])

visualizer.fit(X_train, y_train)        # Fit the training data to the visualizer
visualizer.score(X_test, y_test)        # Evaluate the model on the test data
visualizer.show()                       # Finalize and show the figure
C:\Users\gebra\Anaconda3\envs\ds\lib\site-packages\sklearn\base.py:213: FutureWarning: From version 0.24, get_params will raise an AttributeError if a parameter cannot be retrieved as an instance attribute. Previously it would return None.
  FutureWarning)
Out[183]:
<AxesSubplot:title={'center':'ROC Curves for LogisticRegression'}, xlabel='False Positive Rate', ylabel='True Positive Rate'>

Extra

Esta parte sera dedicada para tentar criar uma tabela e comparar os diferentes tipos de algoritmos graficamente.

In [184]:
testarray = np.asarray(y_test)
In [185]:
DummyClassifier = DummyClassifier()
DummyClassifier.fit(X_train, y_train)
prev_DummyClassifier = DummyClassifier.predict(X_test)
C:\Users\gebra\Anaconda3\envs\ds\lib\site-packages\sklearn\dummy.py:132: FutureWarning: The default value of strategy will change from stratified to prior in 0.24.
  "stratified to prior in 0.24.", FutureWarning)
In [186]:
grap_DummyClassifier = []
for w in range (len(testarray)):
        if prev_DummyClassifier[w] == testarray[w]:
            grap_DummyClassifier.append(0.2)
        else:
            grap_DummyClassifier.append(None) 
In [187]:
LogisticRegression = LogisticRegression()
LogisticRegression.fit(X_train, y_train)
prev_LogisticRegression = LogisticRegression.predict(X_test)
In [188]:
grap_LogisticRegression = []
for w in range (len(testarray)):
        if prev_LogisticRegression[w] == testarray[w]:
            grap_LogisticRegression.append(0.4)
        else:
            grap_LogisticRegression.append(None) 
In [189]:
DecisionTreeClassifier = DecisionTreeClassifier()
DecisionTreeClassifier.fit(X_train, y_train)
prev_DecisionTreeClassifier = DecisionTreeClassifier.predict(X_test)
In [190]:
grap_DecisionTreeClassifier = []
for w in range (len(testarray)):
        if prev_DecisionTreeClassifier[w] == testarray[w]:
            grap_DecisionTreeClassifier.append(0.6)
        else:
            grap_DecisionTreeClassifier.append(None) 
In [191]:
KNeighborsClassifier = KNeighborsClassifier()
KNeighborsClassifier.fit(X_train, y_train)
prev_KNeighborsClassifier = KNeighborsClassifier.predict(X_test)
In [192]:
grap_KNeighborsClassifier = []
for w in range (len(testarray)):
        if prev_KNeighborsClassifier[w] == testarray[w]:
            grap_KNeighborsClassifier.append(0.8)
        else:
            grap_KNeighborsClassifier.append(None) 
In [193]:
GaussianNB = GaussianNB()
GaussianNB.fit(X_train, y_train)
prev_GaussianNB = GaussianNB.predict(X_test)
In [194]:
grap_GaussianNB = []
for w in range (len(testarray)):
        if prev_GaussianNB[w] == testarray[w]:
            grap_GaussianNB.append(1)
        else:
            grap_GaussianNB.append(None) 
In [195]:
RandomForestClassifier = RandomForestClassifier()
RandomForestClassifier.fit(X_train, y_train)
prev_RandomForestClassifier = RandomForestClassifier.predict(X_test)
In [196]:
grap_RandomForestClassifier = []
for w in range (len(testarray)):
        if prev_RandomForestClassifier[w] == testarray[w]:
            grap_RandomForestClassifier.append(1.2)
        else:
            grap_RandomForestClassifier.append(None) 
In [197]:
SVC = SVC()
SVC.fit(X_train, y_train)
prev_SVC = SVC.predict(X_test)
In [198]:
grap_SVC = []
for w in range (len(testarray)):
        if prev_SVC[w] == testarray[w]:
            grap_SVC.append(1.4)
        else:
            grap_SVC.append(None) 
In [199]:
Estrela = []
for x in range (len(grap_SVC)):
    if grap_DummyClassifier[x] != None and grap_LogisticRegression[x] != None and grap_DecisionTreeClassifier[x] != None and grap_KNeighborsClassifier[x] != None and grap_GaussianNB[x] != None and grap_RandomForestClassifier[x] != None and grap_SVC[x] != None :
        Estrela.append(1.6)
    else:
        Estrela.append(None) 
        
In [200]:
plt.figure(figsize=(20,10))
x = np.arange(268)
plt.plot(x, grap_DummyClassifier, 'bo', label='DummyClassifier')
plt.plot(x, grap_LogisticRegression, 'ro', label='LogisticRegression')
plt.plot(x, grap_DecisionTreeClassifier, 'go', label='DecisionTreeClassifier')
plt.plot(x, grap_KNeighborsClassifier, 'co', label='KNeighborsClassifier')
plt.plot(x, grap_GaussianNB, 'yo', label='GaussianNB')
plt.plot(x, grap_RandomForestClassifier, 'mo', label='RandomForestClassifier')
plt.plot(x, grap_SVC, 'ko', label='SVC')
plt.plot(x, Estrela, 'y*', label='Todos Acertaram', markersize=12)
plt.title('Comparacao entre os modelos')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 0))
plt.rcParams.update({'font.size': 30})
In [201]:
 
ERROR: Could not find a version that satisfies the requirement xelatex (from versions: none)
ERROR: No matching distribution found for xelatex
In [ ]: